library(metafor) # for meta-analysis
library(clubSandwich) # for covariance matrix and robust ci estimates
library(rms) # for fitting splines
library(ellipse)
library(ggpubr)
library(mgfunctions)
library(kableExtra)
# Generate the within person quad strength data
quad <- within_data %>%
rename(acl_graft_group = graft_group) %>%
filter(str_detect(measure, "quad")) %>% # take all quad based outcomes
filter(timepoint_mean >2, # remove pre-operative data
timepoint_mean < 120, # remove >10 year data
str_detect(graft, "contralateral|Contralateral", negate = TRUE)) %>% # remove any contralateral grafts
mutate(es_id = row_number()) %>% # effect size id number for use in random effects
mutate(measure_2 = case_when( # classify different outcomes into subgroups:
str_detect(measure, "mvic|hhd") ~ "Isometric",
str_detect(measure, "isk con 180|isk con 230|isk con 240|isk con 300") ~ "Fast Isokinetic",
str_detect(measure, "isk con 30|isk con 60|isk con 90|isk con 120") ~ "Slow Isokinetic"
)) %>%
filter(!is.na(measure_2))
# Reducing timepoint down to categories (3, 6, 12, 24, 48, 96 months post)
# not using this at the moment but keeping code here for now
quad_cat <- quad %>%
mutate(timepoint_cut = cut(timepoint_mean,
breaks = c(0, 4.5, 9, 18, 36, 72, Inf),
labels = c(3, 6, 12, 24, 48, 96)))
#group_by(cohort, timepoint_cut, group, measure_2) %>%
# if multiple timepoints allocated to same category, take the closest to the categorical timepoint
#slice(which.min(abs(timepoint_mean - as.numeric(as.character(timepoint_cut))))) %>%
#ungroup()
# separate subgroups of data based on contraction type
# Some studies report multiple outcomes in the same subgroup
# Case by case removal of data
# Generally:
# for fast if they have isk con 180 use that if possible, otherwise use speed closest to this.
# for slow use isk con 60, or 90
fastdata <- quad_cat %>% filter(measure_2 == "Fast Isokinetic") %>%
filter(!(cohort == "Bailey 2019" & measure == "quad isk con 300"),
!(cohort == "Drocco 2017" & measure == "quad isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "quad isk con 300"),
!(cohort == "Laudner 2015" & measure == "quad isk con 300"),
!(cohort == "Tourville 2014" & measure == "quad isk con 300"),
!(cohort == "Welling 2020" & measure == "quad isk con 300"),
!(cohort == "Welling 2018b" & measure == "quad isk con 300")
)
slowdata <- quad_cat %>% filter(measure_2 == "Slow Isokinetic") %>%
filter(!(cohort == "Li 2022" & measure == "quad isk con 30"),
!(cohort == "Siney 2010" & measure == "quad isk con 90"),
!(cohort == "Zult 2017" & measure == "quad isk con 120"),
!(cohort == "Pamukoff 2018" & measure == "quad isk con 120"))
isodata <- quad_cat %>% filter(measure_2 == "Isometric") %>%
filter(!(cohort == "Karanikas 2005" & measure == "quad mvic 0"),
!(cohort == "Labanca 2018" & measure == "quad mvic 30"),
!(cohort == "Labanca 2016" & measure == "quad mvic 30"),
!(cohort == "Wongcharoenwatana 2019" & measure == "quad mvic 90 hhd prone"))
Need to create a full covariance matrix. Using clubSandwhich package to help Assuming a correlation of 0.85 between timepoints.
fastV <- impute_covariance_matrix(vi = fastdata$vi,
cluster = fastdata$cohort, # cluster is cohort (not study)
ti = fastdata$timepoint_mean, # timepoint
ar1 = 0.85, # auto-correlation between timepoints
check_PD = TRUE, # check positive definite afterwards
smooth_vi = TRUE,
return_list = FALSE) # return the full matrix
slowV <- impute_covariance_matrix(vi = slowdata$vi,
cluster = slowdata$cohort,
ti = slowdata$timepoint_mean,
ar1 = 0.85,
smooth_vi = TRUE,
return_list = FALSE)
isoV <- impute_covariance_matrix(vi = isodata$vi,
cluster = isodata$cohort,
ti = isodata$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
smooth_vi = TRUE,
return_list = FALSE)
Data are now ready for meta-analysis
Need to now decide on how to fit models. Several different structures were trialed in piloting.
Based on piloting best results were achieved with fitting random effects for timepoints, nested within cohorts. Trialled fitting extra random effects e.g. effect size id (es_id), as well as separate models where study group was also a separate random effect, however resulting models were too complex, with indistinguishable random effects and overall a simpler model chosen.
Decision making here revolves around how to fit the timepoint predictor - i.e. what sort of relationship is present between yi and timepoint Different models are generated an then using fit statistics (AIC, BIC, AIcc), visual inspection of fit and expected fit based on knowledge to decide on best fit.
5 different shapes of fit tried: - linear - log - polynomial - 3 knot restricted cubic spline - 4 knot restricted cubic spline
# Create 'empty' models without timepoint used as a moderator variable.
# Will use these models to then build different versions on
# Using a continuous time auto-regressive variance structure (CAR)
slow_mv_empty <- rma.mv(yi, slowV,
data = slowdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
fast_mv_empty <- rma.mv(yi, fastV,
data = fastdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
iso_mv_empty <- rma.mv(yi, isoV,
data = isodata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
4 knot spline best fit
mod_selection(slow_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 150.1557 -300.3114 -292.3114 -278.2901 -292.1454
## 2 Log 190.8914 -381.7827 -373.7827 -359.7614 -373.6168
## 3 Poly (2) 165.4130 -330.8259 -320.8259 -303.3196 -320.5749
## 4 3 knot RCS 190.4994 -380.9989 -370.9989 -353.4926 -370.7478
## 5 4 knot RCS 225.2438 -450.4876 -438.4876 -417.5046 -438.1332
##
## [[2]]
4 knot spline best fit
mod_selection(fast_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 83.57804 -167.1561 -159.1561 -148.8162 -158.7260
## 2 Log 92.82967 -185.6593 -177.6593 -167.3195 -177.2292
## 3 Poly (2) 88.06118 -176.1224 -166.1224 -153.2488 -165.4630
## 4 3 knot RCS 92.70016 -185.4003 -175.4003 -162.5268 -174.7410
## 5 4 knot RCS 98.19512 -196.3902 -184.3902 -169.0041 -183.4464
##
## [[2]]
Log is best fitting, however 4 knot spline appears more realistic, with marginally worse fit.
mod_selection(iso_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 26.12579 -52.25159 -44.25159 -35.94144 -43.51085
## 2 Log 32.34395 -64.68791 -56.68791 -48.37776 -55.94717
## 3 Poly (2) 28.34331 -56.68662 -46.68662 -36.38441 -45.53277
## 4 3 knot RCS 30.17380 -60.34761 -50.34761 -40.04539 -49.19376
## 5 4 knot RCS 32.94939 -65.89877 -53.89877 -41.64047 -52.21877
##
## [[2]]
slow_mv <- rma.mv(yi, slowV,
mods = ~rcs(timepoint_mean, 4),
data = slowdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(slow_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 248; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 165)
## inner factor: timepoint_mean (nlvls = 106)
##
## estim sqrt fixed
## tau^2 0.0108 0.1038 no
## rho 0.9209 no
##
## Test for Residual Heterogeneity:
## QE(df = 244) = 8519.4758, p-val < .0001
##
## Number of estimates: 248
## Number of clusters: 165
## Estimates per cluster: 1-7 (mean: 1.50, median: 1)
##
## Test of Moderators (coefficients 2:4):¹
## F(df1 = 3, df2 = 31.35) = 82.1206, p-val < .0001
##
## Model Results:
##
## estimate se¹ tval¹ df¹
## intrcpt -0.7034 0.0399 -17.6451 36.59
## rcs(timepoint_mean, 4)timepoint_mean 0.0749 0.0063 11.9131 39.34
## rcs(timepoint_mean, 4)timepoint_mean' -2.2454 0.2212 -10.1528 50.52
## rcs(timepoint_mean, 4)timepoint_mean'' 3.3612 0.3348 10.0408 51.58
## pval¹ ci.lb¹ ci.ub¹
## intrcpt <.0001 -0.7842 -0.6226 ***
## rcs(timepoint_mean, 4)timepoint_mean <.0001 0.0622 0.0876 ***
## rcs(timepoint_mean, 4)timepoint_mean' <.0001 -2.6895 -1.8013 ***
## rcs(timepoint_mean, 4)timepoint_mean'' <.0001 2.6893 4.0330 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(slow_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(slow_mv, include_pi = TRUE)
mv_plotdetails(slow_mv, logscale = TRUE, showgraft = FALSE)
predict_details(slow_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 84.9 (83.5 to 86.3) | 87.4 (85.7 to 89.1) | 91.3 (89.2 to 93.4) | 8.8 years | 9 years |
fast_mv <- rma.mv(yi, fastV,
mods = ~rcs(timepoint_mean, 4),
data = fastdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(fast_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 100; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 66)
## inner factor: timepoint_mean (nlvls = 43)
##
## estim sqrt fixed
## tau^2 0.0091 0.0952 no
## rho 0.9298 no
##
## Test for Residual Heterogeneity:
## QE(df = 96) = 2868.2638, p-val < .0001
##
## Number of estimates: 100
## Number of clusters: 66
## Estimates per cluster: 1-6 (mean: 1.52, median: 1)
##
## Test of Moderators (coefficients 2:4):¹
## F(df1 = 3, df2 = 16.1) = 15.5012, p-val < .0001
##
## Model Results:
##
## estimate se¹ tval¹ df¹
## intrcpt -0.5051 0.0595 -8.4955 14.86
## rcs(timepoint_mean, 4)timepoint_mean 0.0489 0.0097 5.0487 15.72
## rcs(timepoint_mean, 4)timepoint_mean' -1.1303 0.2765 -4.0879 20.48
## rcs(timepoint_mean, 4)timepoint_mean'' 1.6862 0.4200 4.0151 20.99
## pval¹ ci.lb¹ ci.ub¹
## intrcpt <.0001 -0.6319 -0.3783 ***
## rcs(timepoint_mean, 4)timepoint_mean 0.0001 0.0283 0.0695 ***
## rcs(timepoint_mean, 4)timepoint_mean' 0.0005 -1.7062 -0.5544 ***
## rcs(timepoint_mean, 4)timepoint_mean'' 0.0006 0.8128 2.5595 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(fast_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(fast_mv, include_pi = TRUE)
mv_plotdetails(fast_mv, logscale = TRUE, showgraft = FALSE)
predict_details(fast_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 86.4 (84.4 to 88.5) | 88.7 (86.5 to 91) | 89.9 (84 to 96.2) | 6.5 years | 5.1 years |
iso_mv <- rma.mv(yi, isoV,
mods = ~rcs(timepoint_mean, 4),
data = isodata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(iso_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 61; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 45)
## inner factor: timepoint_mean (nlvls = 58)
##
## estim sqrt fixed
## tau^2 0.0179 0.1337 no
## rho 0.0000 no
##
## Test for Residual Heterogeneity:
## QE(df = 57) = 3768.7603, p-val < .0001
##
## Number of estimates: 61
## Number of clusters: 45
## Estimates per cluster: 1-3 (mean: 1.36, median: 1)
##
## Test of Moderators (coefficients 2:4):¹
## F(df1 = 3, df2 = 8.97) = 14.7038, p-val = 0.0008
##
## Model Results:
##
## estimate se¹ tval¹ df¹
## intrcpt -0.5719 0.0825 -6.9351 13.57
## rcs(timepoint_mean, 4)timepoint_mean 0.0560 0.0127 4.4242 18.53
## rcs(timepoint_mean, 4)timepoint_mean' -0.8114 0.2265 -3.5820 24.57
## rcs(timepoint_mean, 4)timepoint_mean'' 1.0613 0.3010 3.5261 25.03
## pval¹ ci.lb¹ ci.ub¹
## intrcpt <.0001 -0.7493 -0.3945 ***
## rcs(timepoint_mean, 4)timepoint_mean 0.0003 0.0295 0.0826 ***
## rcs(timepoint_mean, 4)timepoint_mean' 0.0015 -1.2784 -0.3445 **
## rcs(timepoint_mean, 4)timepoint_mean'' 0.0017 0.4415 1.6812 **
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(iso_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(iso_mv, include_pi = TRUE)
mv_plotdetails(iso_mv, logscale = TRUE, showgraft = FALSE)
predict_details(iso_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 90.9 (85.9 to 96.2) | 92.2 (87.5 to 97) | 99.2 (87 to 113.1) | 3.6 years | 5.4 years |
##########3
# Data for Hamstring
hs <- within_data %>%
rename(acl_graft_group = graft_group) %>%
filter(str_detect(measure, "hs")) %>%
filter(timepoint_mean >2, # remove pre-operative data
timepoint_mean < 120, # remove >10 year data
str_detect(graft, "contralateral|Contralateral", negate = TRUE)) %>% # remove any contralateral grafts
mutate(es_id = row_number()) %>%
mutate(measure_2 = case_when(
str_detect(measure, "mvic|hhd") ~ "Isometric",
str_detect(measure, "isk con 180|isk con 230|isk con 240|isk con 300") ~ "Fast Isokinetic",
str_detect(measure, "isk con 30|isk con 60|isk con 90|isk con 120") ~ "Slow Isokinetic"
)) %>%
filter(!is.na(measure_2))
# Reducing timepoint down to categories (3, 6, 12, 24, 48, 96 months post)
# not using this at the moment but keeping code here for now
hs_cat <- hs %>%
mutate(timepoint_cut = cut(timepoint_mean,
breaks = c(0, 4.5, 9, 18, 36, 72, Inf),
labels = c(3, 6, 12, 24, 48, 96)))
#group_by(cohort, timepoint_cut, measure_2) %>%
# if multiple timepoints allocated to same category, take the closest to the categorical timepoint
#slice(which.min(abs(timepoint_mean - as.numeric(as.character(timepoint_cut))))) %>%
#ungroup()
# separate subgroups of data based on contraction type
# Some studies report multiple outcomes in the same subgroup
# Case by case removal of data
# Generally:
# for fast if they have isk con 180 use that if possible, otherwise use speed closest to this.
# for slow use isk con 60, or 90
hs_fastdata <- hs_cat %>% filter(measure_2 == "Fast Isokinetic") %>%
filter(!(cohort == "Drocco 2017" & measure == "hs isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "hs isk con 300"),
!(cohort == "Laudner 2015" & measure == "hs isk con 300"),
!(cohort == "Tourville 2014" & measure == "hs isk con 300"),
!(cohort == "Welling 2020" & measure == "hs isk con 300"),
!(cohort == "Welling 2018b" & measure == "hs isk con 300"))
hs_slowdata <- hs_cat %>% filter(measure_2 == "Slow Isokinetic") %>%
filter(!(cohort == "Lee 2019" & measure == "hs isk con 60 - deep"),
!(cohort == "Kim 2011" & measure == "hs isk con 90 hyperflex"),
!(cohort == "Siney 2010" & measure == "hs isk con 90"),
!(cohort == "McRae 2013" & measure == "hs isk con 60 supine"),
!(cohort == "Li 2022" & measure == "hs isk con 30"),
!(cohort == "Jiang 2012" & measure == "hs isk con 120"),
!(cohort == "Zult 2017" & measure == "hs isk con 120")
)
hs_isodata <- hs_cat %>% filter(measure_2 == "Isometric") %>%
filter(!(cohort == "Hu 2020" & measure == "hs mvic 30 hhd"),
!(cohort == "Hu 2020" & measure == "hs mvic 60 hhd"),
!(cohort == "Ardern 2010" & measure == "hs mvic 30"),
!(cohort == "Ardern 2010" & measure == "hs mvic 105"))
Need to create a full covariance matrix. Using clubSandwhich package to help Assuming a correlation of 0.85 between timepoints.
hs_fastV <- impute_covariance_matrix(vi = hs_fastdata$vi,
cluster = hs_fastdata$cohort,
ti = hs_fastdata$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
smooth_vi = TRUE,
return_list = FALSE)
hs_slowV <- impute_covariance_matrix(vi = hs_slowdata$vi,
cluster = hs_slowdata$cohort,
ti = hs_slowdata$timepoint_mean,
ar1 = 0.85,
smooth_vi = TRUE,
return_list = FALSE)
hs_isoV <- impute_covariance_matrix(vi = hs_isodata$vi,
cluster = hs_isodata$cohort,
ti = hs_isodata$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
smooth_vi = TRUE,
return_list = FALSE)
# Create 'empty' models without timepoint used as a moderator variable.
# Will use these models to then build different versions on
# Using a continuous time auto-regressive variance structure (CAR)
hs_fast_mv_empty <- rma.mv(yi, hs_fastV,
data = hs_fastdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
hs_slow_mv_empty <- rma.mv(yi, hs_slowV,
data = hs_slowdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
hs_iso_mv_empty <- rma.mv(yi, hs_isoV,
data = hs_isodata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
4 knot spline best fit
mod_selection(hs_slow_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 160.7531 -321.5061 -313.5061 -300.5817 -313.2864
## 2 Log 172.0767 -344.1534 -336.1534 -323.2290 -335.9337
## 3 Poly (2) 162.9579 -325.9158 -315.9158 -299.7871 -315.5825
## 4 3 knot RCS 170.9374 -341.8748 -331.8748 -315.7460 -331.5414
## 5 4 knot RCS 193.1509 -386.3018 -374.3018 -354.9796 -373.8299
##
## [[2]]
4 knot spline best fit
mod_selection(hs_fast_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 70.06780 -140.1356 -132.1356 -122.7604 -131.5800
## 2 Log 72.93478 -145.8696 -137.8696 -128.4943 -137.3140
## 3 Poly (2) 70.64831 -141.2966 -131.2966 -119.6430 -130.4395
## 4 3 knot RCS 71.29610 -142.5922 -132.5922 -120.9385 -131.7351
## 5 4 knot RCS 75.16984 -150.3397 -138.3397 -124.4348 -137.1044
##
## [[2]]
Log is best fit 4 knot is overfit.
mod_selection(hs_iso_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 11.84160 -23.68320 -15.68320 -9.349126 -14.39288
## 2 Log 12.53822 -25.07644 -17.07644 -10.742363 -15.78612
## 3 Poly (2) 11.13393 -22.26785 -12.26785 -4.491112 -10.19889
## 4 3 knot RCS 11.78507 -23.57014 -13.57014 -5.793402 -11.50118
## 5 4 knot RCS 16.69763 -33.39527 -21.39527 -12.237107 -18.28416
##
## [[2]]
hs_slow_mv <- rma.mv(yi, hs_slowV,
mods = ~rcs(timepoint_mean, 4),
data = hs_slowdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hs_slow_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 189; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 127)
## inner factor: timepoint_mean (nlvls = 90)
##
## estim sqrt fixed
## tau^2 0.0071 0.0844 no
## rho 0.8207 no
##
## Test for Residual Heterogeneity:
## QE(df = 185) = 5528.5858, p-val < .0001
##
## Number of estimates: 189
## Number of clusters: 127
## Estimates per cluster: 1-7 (mean: 1.49, median: 1)
##
## Test of Moderators (coefficients 2:4):¹
## F(df1 = 3, df2 = 24.88) = 11.5255, p-val < .0001
##
## Model Results:
##
## estimate se¹ tval¹ df¹
## intrcpt -0.4196 0.0569 -7.3787 27.59
## rcs(timepoint_mean, 4)timepoint_mean 0.0529 0.0089 5.9512 32.57
## rcs(timepoint_mean, 4)timepoint_mean' -1.7377 0.2982 -5.8268 40.88
## rcs(timepoint_mean, 4)timepoint_mean'' 2.6183 0.4501 5.8176 41.63
## pval¹ ci.lb¹ ci.ub¹
## intrcpt <.0001 -0.5361 -0.3030 ***
## rcs(timepoint_mean, 4)timepoint_mean <.0001 0.0348 0.0709 ***
## rcs(timepoint_mean, 4)timepoint_mean' <.0001 -2.3400 -1.1354 ***
## rcs(timepoint_mean, 4)timepoint_mean'' <.0001 1.7098 3.5269 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hs_slow_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hs_slow_mv, include_pi = TRUE)
mv_plotdetails(hs_slow_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hs_slow_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 93.8 (92.5 to 95.1) | 91.8 (90.2 to 93.4) | 92.9 (90.8 to 95.1) | 8 years | 9 years |
hs_fast_mv <- rma.mv(yi, hs_fastV,
mods = ~rcs(timepoint_mean, 4),
data = hs_fastdata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hs_fast_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 79; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 53)
## inner factor: timepoint_mean (nlvls = 40)
##
## estim sqrt fixed
## tau^2 0.0094 0.0968 no
## rho 0.9105 no
##
## Test for Residual Heterogeneity:
## QE(df = 75) = 4054.9033, p-val < .0001
##
## Number of estimates: 79
## Number of clusters: 53
## Estimates per cluster: 1-6 (mean: 1.49, median: 1)
##
## Test of Moderators (coefficients 2:4):¹
## F(df1 = 3, df2 = 10.46) = 1.7558, p-val = 0.2163
##
## Model Results:
##
## estimate se¹ tval¹ df¹
## intrcpt -0.3249 0.1020 -3.1865 11.78
## rcs(timepoint_mean, 4)timepoint_mean 0.0401 0.0163 2.4573 12.87
## rcs(timepoint_mean, 4)timepoint_mean' -0.7001 0.2906 -2.4092 16.63
## rcs(timepoint_mean, 4)timepoint_mean'' 1.0612 0.4415 2.4036 17.04
## pval¹ ci.lb¹ ci.ub¹
## intrcpt 0.0080 -0.5476 -0.1023 **
## rcs(timepoint_mean, 4)timepoint_mean 0.0290 0.0048 0.0753 *
## rcs(timepoint_mean, 4)timepoint_mean' 0.0279 -1.3142 -0.0860 *
## rcs(timepoint_mean, 4)timepoint_mean'' 0.0279 0.1299 1.9925 *
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hs_fast_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hs_fast_mv, include_pi = TRUE)
mv_plotdetails(hs_fast_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hs_fast_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 94 (92 to 96) | 92.4 (90.1 to 94.7) | 95.6 (82.9 to 110.3) | 3.6 years | 5.1 years |
hs_iso_mv <- rma.mv(yi, hs_isoV,
mods = ~log(timepoint_mean),
data = hs_isodata,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hs_iso_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 38; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 27)
## inner factor: timepoint_mean (nlvls = 31)
##
## estim sqrt fixed
## tau^2 0.0296 0.1720 no
## rho 0.7141 no
##
## Test for Residual Heterogeneity:
## QE(df = 36) = 4240.2591, p-val < .0001
##
## Number of estimates: 38
## Number of clusters: 27
## Estimates per cluster: 1-3 (mean: 1.41, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 14.82) = 1.4841, p-val = 0.2422
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.2836 0.1035 -2.7394 16.06 0.0145 -0.5030
## log(timepoint_mean) 0.0464 0.0381 1.2183 14.82 0.2422 -0.0349
## ci.ub¹
## intrcpt -0.0642 *
## log(timepoint_mean) 0.1278
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hs_iso_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hs_iso_mv, include_pi = TRUE)
mv_plotdetails(hs_iso_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hs_iso_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 84.5 (79.7 to 89.6) | 87.3 (80.9 to 94.2) | 91.1 (79.4 to 104.5) | 3.4 years | 5 years |
# Generate the within person quad strength data
quadcont <- casecontrol %>%
filter(str_detect(measure, "quad")) %>%
mutate(es_id = row_number(),
timepoint_mean = acl_timepoint_mean,
group = 1) %>%
filter(timepoint_mean >2, # no pre-operative data
timepoint_mean < 120) %>%
mutate(measure_2 = case_when(
str_detect(measure, "mvic|hhd") ~ "Isometric",
str_detect(measure, "isk con 180|isk con 230|isk con 240|isk con 300") ~ "Fast Isokinetic",
str_detect(measure, "isk con 30|isk con 60|isk con 90|isk con 120") ~ "Slow Isokinetic"
)) %>%
filter(!is.na(measure_2)) %>%
#filter(timepoint_mean < 100) %>%
filter(!(cohort == "Laudner 2015" & measure == "quad isk con 300"), # remove where multiple effect sizes in same measure group
!(cohort == "Pamukoff 2018" & measure == "quad isk con 120"),
!(cohort == "Tourville 2014" & measure == "quad isk con 300"),
!(cohort == "Zult 2017" & measure == "quad isk con 120"))
# Reducing timepoint down to categories (3, 6, 12, 24, 48, 96 months post)
# not using this at the moment but keeping code here for now
quadcont_cat <- quadcont %>%
mutate(timepoint_cut = cut(timepoint_mean,
breaks = c(0, 4.5, 9, 18, 36, 72, Inf),
labels = c(3, 6, 12, 24, 48, 96)))
#group_by(cohort, timepoint_cut, group, measure_2) %>%
# if multiple timepoints allocated to same category, take the closest to the categorical timepoint
#slice(which.min(abs(timepoint_mean - as.numeric(as.character(timepoint_cut))))) %>%
#ungroup()
# separate subgroups of data based on contraction type
fastcont <- quadcont_cat %>% filter(measure_2 == "Fast Isokinetic")
slowcont <- quadcont_cat %>% filter(measure_2 == "Slow Isokinetic")
isocont <- quadcont_cat %>% filter(measure_2 == "Isometric")
Need to create a full covariance matrix. Using clubSandwhich package to help Assuming a correlation of 0.85 between timepoints.
qcont_slowV <- impute_covariance_matrix(vi = slowcont$vi,
cluster = slowcont$cohort,
ti = slowcont$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
return_list = FALSE)
qcont_fastV <- impute_covariance_matrix(vi = fastcont$vi,
cluster = fastcont$cohort,
ti = fastcont$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
return_list = FALSE)
qcont_isoV <- impute_covariance_matrix(vi = isocont$vi,
cluster = isocont$cohort,
ti = isocont$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
return_list = FALSE)
Data are now ready for meta-analysis
# Create 'empty' models without timepoint used as a moderator variable.
# Will use these models to then build different versions on
# Using a continuous time auto-regressive variance structure (CAR)
qcont_slow_mv_empty <- rma.mv(yi, qcont_slowV,
data = slowcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
qcont_fast_mv_empty <- rma.mv(yi, qcont_fastV,
data = fastcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
qcont_iso_mv_empty <- rma.mv(yi, qcont_isoV,
data = isocont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
l=Log best fit
mod_selection(qcont_slow_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 12.06488 -24.12976 -16.12976 -11.765590 -13.776819
## 2 Log 13.34260 -26.68520 -18.68520 -14.321033 -16.332262
## 3 Poly (2) 11.23978 -22.47956 -12.47956 -7.256951 -8.479563
## 4 3 knot RCS 11.98789 -23.97579 -13.97579 -8.753174 -9.975787
## 5 4 knot RCS 13.01079 -26.02159 -14.02159 -8.047192 -7.560047
##
## [[2]]
Log spline best fit
mod_selection(qcont_fast_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 8.183540 -16.36708 -8.3670806 -6.107283 -3.367081
## 2 Log 8.583040 -17.16608 -9.1660803 -6.906283 -4.166080
## 3 Poly (2) 7.366976 -14.73395 -4.7339514 -2.309418 5.266049
## 4 3 knot RCS 7.207663 -14.41533 -4.4153256 -1.990792 5.584674
## 5 4 knot RCS 6.281128 -12.56226 -0.5622562 1.825115 20.437744
##
## [[2]]
Log is best fit
mod_selection(qcont_iso_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 5.147059 -10.29412 -2.294118 2.8892299 -0.4759357
## 2 Log 5.988042 -11.97608 -3.976083 1.2072643 -2.1579014
## 3 Poly (2) 5.882500 -11.76500 -1.765000 4.5254823 1.2349997
## 4 3 knot RCS 5.602298 -11.20460 -1.204597 5.0858859 1.7954032
## 5 4 knot RCS 9.264707 -18.52941 -6.529415 0.7838403 -1.8627480
##
## [[2]]
qcont_slow_mv <- rma.mv(yi, qcont_slowV,
mods = ~log(timepoint_mean),
data = slowcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(qcont_slow_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 24; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 18)
## inner factor: timepoint_mean (nlvls = 22)
##
## estim sqrt fixed
## tau^2 0.0152 0.1232 no
## rho 0.6874 no
##
## Test for Residual Heterogeneity:
## QE(df = 22) = 210.5326, p-val < .0001
##
## Number of estimates: 24
## Number of clusters: 18
## Estimates per cluster: 1-4 (mean: 1.33, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 7.87) = 17.6016, p-val = 0.0031
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.5194 0.0661 -7.8558 9.65 <.0001 -0.6675
## log(timepoint_mean) 0.1210 0.0288 4.1954 7.87 0.0031 0.0543
## ci.ub¹
## intrcpt -0.3714 ***
## log(timepoint_mean) 0.1877 **
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(qcont_slow_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(qcont_slow_mv, include_pi = TRUE)
mv_plotdetails(qcont_slow_mv, logscale = TRUE, showgraft = FALSE)
predict_details(qcont_slow_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 80.3 (75.1 to 85.9) | 87.4 (78.8 to 96.9) | 97.6 (83.1 to 114.7) | 2.4 years | 4.6 years |
qcont_fast_mv <- rma.mv(yi, qcont_fastV,
mods = ~log(timepoint_mean),
data = fastcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(qcont_fast_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 15; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 11)
## inner factor: timepoint_mean (nlvls = 13)
##
## estim sqrt fixed
## tau^2 0.0124 0.1112 no
## rho 0.0000 no
##
## Test for Residual Heterogeneity:
## QE(df = 13) = 116.5288, p-val < .0001
##
## Number of estimates: 15
## Number of clusters: 11
## Estimates per cluster: 1-4 (mean: 1.36, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 4.85) = 5.4999, p-val = 0.0675
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.3707 0.0664 -5.5795 4 0.0051 -0.5552
## log(timepoint_mean) 0.0580 0.0247 2.3452 4.85 0.0675 -0.0062
## ci.ub¹
## intrcpt -0.1862 **
## log(timepoint_mean) 0.1221 .
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(qcont_fast_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(qcont_fast_mv, include_pi = TRUE)
mv_plotdetails(qcont_fast_mv, logscale = TRUE, showgraft = FALSE)
predict_details(qcont_fast_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 79.7 (73.6 to 86.3) | 83 (75.3 to 91.4) | 87.5 (75.8 to 101.1) | 4.5 years | 4.9 years |
qcont_iso_mv <- rma.mv(yi, qcont_isoV,
mods = ~log(timepoint_mean),
data = isocont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(qcont_iso_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 29; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 27)
## inner factor: timepoint_mean (nlvls = 28)
##
## estim sqrt fixed
## tau^2 0.0337 0.1836 no
## rho 0.0000 no
##
## Test for Residual Heterogeneity:
## QE(df = 27) = 720.8710, p-val < .0001
##
## Number of estimates: 29
## Number of clusters: 27
## Estimates per cluster: 1-2 (mean: 1.07, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 12.03) = 5.1565, p-val = 0.0423
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.3777 0.0830 -4.5520 7.56 0.0022 -0.5710
## log(timepoint_mean) 0.0583 0.0257 2.2708 12.03 0.0423 0.0024
## ci.ub¹
## intrcpt -0.1844 **
## log(timepoint_mean) 0.1142 *
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(qcont_iso_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(qcont_iso_mv, include_pi = TRUE)
mv_plotdetails(qcont_iso_mv, logscale = TRUE, showgraft = FALSE)
predict_details(qcont_iso_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 79.2 (73.9 to 85) | 82.5 (77.6 to 87.7) | 87 (79.9 to 94.7) | 8.7 years | 4.5 years |
hscont <- casecontrol %>%
filter(str_detect(measure, "hs")) %>%
mutate(es_id = row_number(),
timepoint_mean = acl_timepoint_mean,
group = 1) %>%
filter(timepoint_mean >2, # no pre-operative data
timepoint_mean < 120) %>%
mutate(measure_2 = case_when(
str_detect(measure, "mvic|hhd") ~ "Isometric",
str_detect(measure, "isk con 180|isk con 230|isk con 240|isk con 300") ~ "Fast Isokinetic",
str_detect(measure, "isk con 30|isk con 60|isk con 90|isk con 120") ~ "Slow Isokinetic"
)) %>%
filter(!is.na(measure_2)) %>%
filter(timepoint_mean < 100) %>%
filter(!(cohort == "Laudner 2015" & measure == "hs isk con 300"), # remove where multiple effect sizes in same measure group
!(cohort == "Tourville 2014" & measure == "hs isk con 300"),
!(cohort == "Zult 2017" & measure == "hs isk con 120"))
# Reducing timepoint down to categories (3, 6, 12, 24, 48, 96 months post)
# not using this at the moment but keeping code here for now
hscont_cat <- hscont %>%
mutate(timepoint_cut = cut(timepoint_mean,
breaks = c(0, 4.5, 9, 18, 36, 72, Inf),
labels = c(3, 6, 12, 24, 48, 96)))
#group_by(cohort, timepoint_cut, measure_2) %>%
# if multiple timepoints allocated to same category, take the closest to the categorical timepoint
#slice(which.min(abs(timepoint_mean - as.numeric(as.character(timepoint_cut))))) %>%
#ungroup()
# separate subgroups of data based on contraction type
hs_fastcont <- hscont_cat %>% filter(measure_2 == "Fast Isokinetic")
hs_slowcont <- hscont_cat %>% filter(measure_2 == "Slow Isokinetic")
hs_isocont <- hscont_cat %>% filter(measure_2 == "Isometric")
Need to create a full covariance matrix. Using clubSandwhich package to help Assuming a correlation of 0.85 between timepoints.
hcont_fastV <- impute_covariance_matrix(vi = hs_fastcont$vi,
cluster = hs_fastcont$cohort,
ti = hs_fastcont$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
smooth_vi = TRUE,
return_list = FALSE)
hcont_slowV <- impute_covariance_matrix(vi = hs_slowcont$vi,
cluster = hs_slowcont$cohort,
ti = hs_slowcont$timepoint_mean,
ar1 = 0.85,
smooth_vi = TRUE,
return_list = FALSE)
hcont_isoV <- impute_covariance_matrix(vi = hs_isocont$vi,
cluster = hs_isocont$cohort,
ti = hs_isocont$timepoint_mean,
ar1 = 0.85,
check_PD = TRUE,
smooth_vi = TRUE,
return_list = FALSE)
# Create 'empty' models without timepoint used as a moderator variable.
# Will use these models to then build different versions on
# Using a continuous time auto-regressive variance structure (CAR)
hcont_slow_mv_empty <- rma.mv(yi, hcont_slowV,
data = hs_slowcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
hcont_fast_mv_empty <- rma.mv(yi, hcont_fastV,
data = hs_fastcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
hcont_iso_mv_empty <- rma.mv(yi, hcont_isoV,
data = hs_isocont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
Log best fit
mod_selection(hcont_slow_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 13.20571 -26.41141 -18.41141 -15.32106 -14.775050
## 2 Log 15.04066 -30.08131 -22.08131 -18.99096 -18.444947
## 3 Poly (2) 12.31979 -24.63957 -14.63957 -11.09932 -7.972908
## 4 3 knot RCS 16.94020 -33.88040 -23.88040 -20.34015 -17.213733
## 5 4 knot RCS 18.36443 -36.72885 -24.72885 -20.89451 -12.728854
##
## [[2]]
Log best fit
mod_selection(hcont_fast_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 3.937956 -7.875911 0.1240888 1.715670 6.790755
## 2 Log 4.052934 -8.105868 -0.1058677 1.485713 6.560799
## 3 Poly (2) 3.162075 -6.324150 3.6758500 5.188775 18.675850
## 4 3 knot RCS 3.483855 -6.967710 3.0322903 4.545216 18.032290
## 5 4 knot RCS 3.827806 -7.655612 2.3443884 3.857314 17.344388
##
## [[2]]
Polynomial (2) is best fit A lot of uncertainty here.
mod_selection(hcont_iso_mv_empty)
## [[1]]
## mod logLik. deviance. AIC. BIC. AICc.
## 1 Linear 2.465917 -4.931833 3.0681665 5.007793 8.782452
## 2 Log 1.821243 -3.642486 4.3575138 6.297140 10.071800
## 3 Poly (2) 4.590334 -9.180668 0.8193318 2.808808 12.819332
## 4 3 knot RCS 4.417871 -8.835742 1.1642579 3.153734 13.164258
## 5 4 knot RCS 5.223635 -10.447270 1.5527296 3.368240 29.552730
##
## [[2]]
hcont_slow_mv <- rma.mv(yi, hcont_slowV,
mods = ~log(timepoint_mean),
data = hs_slowcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hcont_slow_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 18; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 12)
## inner factor: timepoint_mean (nlvls = 17)
##
## estim sqrt fixed
## tau^2 0.0092 0.0962 no
## rho 0.8840 no
##
## Test for Residual Heterogeneity:
## QE(df = 16) = 97.2809, p-val < .0001
##
## Number of estimates: 18
## Number of clusters: 12
## Estimates per cluster: 1-4 (mean: 1.50, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 4.32) = 23.6758, p-val = 0.0068
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.2900 0.0624 -4.6463 6.36 0.0030 -0.4407
## log(timepoint_mean) 0.0983 0.0202 4.8658 4.32 0.0068 0.0438
## ci.ub¹
## intrcpt -0.1393 **
## log(timepoint_mean) 0.1528 **
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hcont_slow_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hcont_slow_mv, include_pi = TRUE)
mv_plotdetails(hcont_slow_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hcont_slow_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 95.5 (88.7 to 102.9) | 102.3 (92.7 to 112.8) | 111.9 (97.7 to 128.1) | 0.7 years | 4.6 years |
hcont_fast_mv <- rma.mv(yi, hcont_fastV,
mods = ~log(timepoint_mean),
data = hs_fastcont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hcont_fast_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 13; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 9)
## inner factor: timepoint_mean (nlvls = 12)
##
## estim sqrt fixed
## tau^2 0.0326 0.1804 no
## rho 0.9154 no
##
## Test for Residual Heterogeneity:
## QE(df = 11) = 102.0693, p-val < .0001
##
## Number of estimates: 13
## Number of clusters: 9
## Estimates per cluster: 1-4 (mean: 1.44, median: 1)
##
## Test of Moderators (coefficient 2):¹
## F(df1 = 1, df2 = 2.56) = 19.9575, p-val = 0.0290
##
## Model Results:
##
## estimate se¹ tval¹ df¹ pval¹ ci.lb¹
## intrcpt -0.3128 0.0693 -4.5148 3.29 0.0166 -0.5227
## log(timepoint_mean) 0.0900 0.0202 4.4674 2.56 0.0290 0.0192
## ci.ub¹
## intrcpt -0.1030 *
## log(timepoint_mean) 0.1609 *
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hcont_fast_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hcont_fast_mv, include_pi = TRUE)
mv_plotdetails(hcont_fast_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hcont_fast_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 91.5 (79.5 to 105.3) | 97.4 (79.3 to 119.6) | 105.7 (80.3 to 139.3) | 0.5 years | 4.3 years |
hcont_iso_mv <- rma.mv(yi, hcont_isoV,
mods = ~poly(timepoint_mean, degree = 2, raw = TRUE),
data = hs_isocont,
random = list(~ timepoint_mean|cohort),
struct = c("CAR"))
robust(hcont_iso_mv, cluster = cohort, clubSandwich = TRUE)
##
## Multivariate Meta-Analysis Model (k = 14; method: REML)
##
## Variance Components:
##
## outer factor: cohort (nlvls = 12)
## inner factor: timepoint_mean (nlvls = 13)
##
## estim sqrt fixed
## tau^2 0.0238 0.1544 no
## rho 0.6188 no
##
## Test for Residual Heterogeneity:
## QE(df = 11) = 168.2278, p-val < .0001
##
## Number of estimates: 14
## Number of clusters: 12
## Estimates per cluster: 1-2 (mean: 1.17, median: 1)
##
## Test of Moderators (coefficients 2:3):¹
## F(df1 = 2, df2 = 4.37) = 15.8317, p-val = 0.0100
##
## Model Results:
##
## estimate se¹ tval¹
## intrcpt -0.3123 0.1578 -1.9789
## poly(timepoint_mean, degree = 2, raw = TRUE)1 0.0433 0.0170 2.5415
## poly(timepoint_mean, degree = 2, raw = TRUE)2 -0.0013 0.0004 -3.3905
## df¹ pval¹ ci.lb¹
## intrcpt 4.81 0.1070 -0.7229
## poly(timepoint_mean, degree = 2, raw = TRUE)1 6.05 0.0437 0.0017
## poly(timepoint_mean, degree = 2, raw = TRUE)2 5.61 0.0163 -0.0022
## ci.ub¹
## intrcpt 0.0984
## poly(timepoint_mean, degree = 2, raw = TRUE)1 0.0848 *
## poly(timepoint_mean, degree = 2, raw = TRUE)2 -0.0003 *
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## 1) results based on cluster-robust inference (var-cov estimator: CR2,
## approx t/F-tests and confidence intervals, df: Satterthwaite approx)
profile(hcont_iso_mv)
## Profiling tau2 = 1
## Profiling rho = 1
Plots - normal and log scale
mv_plotdetails(hcont_iso_mv, include_pi = TRUE)
mv_plotdetails(hcont_iso_mv, logscale = TRUE, showgraft = FALSE)
predict_details(hcont_iso_mv) %>% kbl() %>% kable_styling(position = "left", full_width = FALSE)
| 1 year | 2 years | 5 years | Zero crossing | Last Data Point |
|---|---|---|---|---|
| 102.1 (89.3 to 116.7) | 98.1 (84.8 to 113.5) | 9.3 (2.4 to 35.5) | 2.3 years | 3 years |
Of interest is whether studies that compare both between person and within person show similar effects - i.e. are the effects different For this we select all the datapoints that have both between and within person comparisons and conduct a bivariate meta-analysis. We have to account for non-independence of samples in the analysis conducting a variance/covariance matrix and also fitting random effects for this (type of outcome: within/between person nested within cohorts). To calculate this we assumed a rho of 0.7. Sensitivity analyses show that estimates are stable to different values for this.
Because the majority of studies report only between person comparisons, we chose to only look at those studies that measured both (i.e. “bivariate”), otherwise estimation of the correlation between effects will be spurious.
Set up:
## Bivariate Analyses
## Investigating relationship between within and between person estimates
# Within Person Data
quad1 <- quad %>%
select(study, cohort, measure, measure_2, acl_n, timepoint, timepoint_mean, group, acl_graft_group, yi, vi) %>%
rename(acl_group = group) %>%
mutate(type = "within")
# Between person data
quad2 <- quadcont %>%
select(study, cohort, measure, measure_2, acl_n, timepoint, timepoint_mean, acl_group, acl_graft_group, yi, vi) %>%
mutate(type = "casecontrol")
# Join together
quad_joined <- bind_rows(quad1, quad2)
# Split based on contraction types
slow_joined <- quad_joined %>% filter(measure_2 == "Slow Isokinetic") %>%
filter(!(cohort == "Li 2022" & measure == "quad isk con 30"),
!(cohort == "Siney 2010" & measure == "quad isk con 90")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>% # make sure to only include studies who measure both effects
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>% # take one effect per study, preferably closest to 12 months if multiple
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
fast_joined <- quad_joined %>% filter(measure_2 == "Fast Isokinetic") %>%
filter(!(cohort == "Bailey 2019" & measure == "quad isk con 300"),
!(cohort == "Drocco 2017" & measure == "quad isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "quad isk con 300"),
!(cohort == "Laudner 2015" & measure == "quad isk con 300"),
!(cohort == "Pamukoff 2018" & measure == "quad isk con 120"),
!(cohort == "Tourville 2014" & measure == "quad isk con 300"),
!(cohort == "Welling 2020" & measure == "quad isk con 300"),
!(cohort == "Welling 2018b" & measure == "quad isk con 300"),
!(cohort == "Zult 2017" & measure == "quad isk con 120")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>%
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
iso_joined <- quad_joined %>% filter(measure_2 == "Isometric") %>%
filter(!(cohort == "Karanikas 2005" & measure == "quad mvic 0"),
!(cohort == "Labanca 2018" & measure == "quad mvic 30"),
!(cohort == "Labanca 2016" & measure == "quad mvic 30")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>%
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
# Create covariance matrices for each, assuming rho of 0.7
slowjoinedV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = slow_joined,
checkpd = TRUE)
fastjoinedV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = fast_joined,
checkpd = TRUE)
isojoinedV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = iso_joined,
checkpd = TRUE)
# Fit individual models with random effects for type of effect, nested within each cohort.
slow_joined_mv <- rma.mv(yi,
slowjoinedV,
mods = ~ type - 1, # remove intercept to generate estimate for each.
random = list(~ type | cohort),
struct = c("UN"),
data = slow_joined,
cvvc = "varcov") # need this to use later with matreg
fast_joined_mv <- rma.mv(yi,
fastjoinedV,
mods = ~ type - 1,
random = list(~ type | cohort),
struct = c("UN"),
data = fast_joined,
cvvc = "varcov")
iso_joined_mv <- rma.mv(yi,
isojoinedV,
mods = ~ type - 1,
random = list(~ type | cohort),
struct = c("UN"),
data = iso_joined,
cvvc = "varcov")
####
## Hamstring
####
hs1 <- hs %>%
select(study, cohort, measure, measure_2, acl_n, timepoint, timepoint_mean, group, acl_graft_group, yi, vi) %>%
rename(acl_group = group) %>%
mutate(type = "within")
hs2 <- hscont %>%
select(study, cohort, measure, measure_2, acl_n, timepoint, timepoint_mean, acl_group, acl_graft_group, yi, vi) %>%
mutate(type = "casecontrol")
hs_joined <- bind_rows(hs1, hs2)
slow_joined_hs <- hs_joined %>% filter(measure_2 == "Slow Isokinetic") %>%
filter(!(cohort == "Lee 2019" & measure == "hs isk con 60 - deep"),
!(cohort == "Kim 2011" & measure == "hs isk con 90 hyperflex"),
!(cohort == "Siney 2010" & measure == "hs isk con 90"),
!(cohort == "McRae 2013" & measure == "hs isk con 60 supine"),
!(cohort == "Li 2022" & measure == "hs isk con 30")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>%
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
fast_joined_hs <- hs_joined %>% filter(measure_2 == "Fast Isokinetic") %>%
filter(!(cohort == "Drocco 2017" & measure == "hs isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "hs isk con 300"),
!(cohort == "Laudner 2015" & measure == "hs isk con 300"),
!(cohort == "Tourville 2014" & measure == "hs isk con 300"),
!(cohort == "Welling 2020" & measure == "hs isk con 300"),
!(cohort == "Welling 2018b" & measure == "hs isk con 300")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>%
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
iso_joined_hs <- hs_joined %>% filter(measure_2 == "Isometric") %>%
filter(!(cohort == "Hu 2020" & measure == "hs mvic 30 hhd"),
!(cohort == "Hu 2020" & measure == "hs mvic 60 hhd"),
!(cohort == "Ardern 2010" & measure == "hs mvic 30"),
!(cohort == "Ardern 2010" & measure == "hs mvic 105")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>%
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
slowjoined_hsV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = slow_joined_hs,
checkpd = TRUE)
fastjoined_hsV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = fast_joined_hs,
checkpd = TRUE)
isojoined_hsV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = iso_joined_hs,
checkpd = TRUE)
slow_joined_hs_mv <- rma.mv(yi,
slowjoined_hsV,
mods = ~ type - 1,
random = list(~ type | cohort),
struct = c("UN"),
data = slow_joined_hs,
cvvc = "varcov")
fast_joined_hs_mv <- rma.mv(yi,
fastjoined_hsV,
mods = ~ type - 1,
random = list(~ type | cohort),
struct = c("UN"),
data = fast_joined_hs,
cvvc = "varcov")
iso_joined_hs_mv <- rma.mv(yi,
isojoined_hsV,
mods = ~ type - 1,
random = list(~ type | cohort),
struct = c("UN"),
data = iso_joined_hs,
cvvc = "varcov")
For model simplicity we only selected one estimate per study (i.e. one timepoint), as almost all between person studies only measure one timepoint. For the few studies that report multiple, we took the timepoint closest to 12 months (most common timepoint)
The resulting analysis provides an estimate for both between and within person estimates. More interestingly we also get an estimate of the correlation between effects.
We can then regress the true effects from the model to see the relationship between the two outcomes
In general we can say that between person effects don’t always mirror within person effects
summary(slow_joined_mv)
##
## Multivariate Meta-Analysis Model (k = 32; method: REML)
##
## logLik Deviance AIC BIC AICc
## 18.2483 -36.4967 -26.4967 -19.4907 -23.9967
##
## Variance Components:
##
## outer factor: cohort (nlvls = 16)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0277 0.1665 16 no casecontrol
## tau^2.2 0.0181 0.1347 16 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 16
## within 0.7132 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 30) = 969.4979, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 30.9018, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.2124 0.0433 -4.9016 <.0001 -0.2973 -0.1275 ***
## typewithin -0.1811 0.0341 -5.3171 <.0001 -0.2478 -0.1143 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(slow_joined_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.3681 0.1312 2.8055 0.0050 0.1109 0.6252 **
## casecontrol 0.5767 0.1622 3.5546 0.0004 0.2587 0.8946 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(fast_joined_mv)
##
## Multivariate Meta-Analysis Model (k = 20; method: REML)
##
## logLik Deviance AIC BIC AICc
## 15.0261 -30.0522 -20.0522 -15.6003 -15.0522
##
## Variance Components:
##
## outer factor: cohort (nlvls = 10)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0125 0.1119 10 no casecontrol
## tau^2.2 0.0080 0.0894 10 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 10
## within 0.4530 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 18) = 298.7612, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 34.7953, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.1969 0.0396 -4.9678 <.0001 -0.2746 -0.1192 ***
## typewithin -0.1499 0.0292 -5.1350 <.0001 -0.2071 -0.0927 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(fast_joined_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.5636 0.2420 2.3287 0.0199 0.0892 1.0379 *
## casecontrol 0.3619 0.2947 1.2282 0.2194 -0.2156 0.9395
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(iso_joined_mv)
##
## Multivariate Meta-Analysis Model (k = 28; method: REML)
##
## logLik Deviance AIC BIC AICc
## 14.8694 -29.7387 -19.7387 -13.4483 -16.7387
##
## Variance Components:
##
## outer factor: cohort (nlvls = 14)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0340 0.1843 14 no casecontrol
## tau^2.2 0.0238 0.1542 14 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 14
## within 0.7957 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 26) = 830.8786, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 18.5663, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.2103 0.0517 -4.0707 <.0001 -0.3116 -0.1091 ***
## typewithin -0.1692 0.0417 -4.0597 <.0001 -0.2508 -0.0875 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(iso_joined_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.3051 0.1234 2.4719 0.0134 0.0632 0.5470 *
## casecontrol 0.6655 0.1523 4.3692 <.0001 0.3670 0.9640 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(slow_joined_hs_mv)
##
## Multivariate Meta-Analysis Model (k = 20; method: REML)
##
## logLik Deviance AIC BIC AICc
## 27.8370 -55.6741 -45.6741 -41.2222 -40.6741
##
## Variance Components:
##
## outer factor: cohort (nlvls = 10)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0062 0.0787 10 no casecontrol
## tau^2.2 0.0007 0.0269 10 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 10
## within 0.2382 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 18) = 128.1188, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 32.7972, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.0215 0.0280 -0.7676 0.4427 -0.0763 0.0333
## typewithin -0.0548 0.0098 -5.5996 <.0001 -0.0740 -0.0356 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(slow_joined_hs_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.8669 0.1422 6.0956 <.0001 0.5882 1.1457 ***
## casecontrol 0.0815 0.1453 0.5607 0.5750 -0.2033 0.3663
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(fast_joined_hs_mv)
##
## Multivariate Meta-Analysis Model (k = 14; method: REML)
##
## logLik Deviance AIC BIC AICc
## 11.0240 -22.0480 -12.0480 -9.6235 -2.0480
##
## Variance Components:
##
## outer factor: cohort (nlvls = 7)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0459 0.2144 7 no casecontrol
## tau^2.2 0.0061 0.0780 7 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 7
## within 0.8629 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 12) = 167.9627, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 6.4110, p-val = 0.0405
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.0686 0.0832 -0.8239 0.4100 -0.2316 0.0945
## typewithin -0.0593 0.0304 -1.9506 0.0511 -0.1188 0.0003 .
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(fast_joined_hs_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.6493 0.0850 7.6421 <.0001 0.4827 0.8158 ***
## casecontrol 0.3140 0.0910 3.4512 0.0006 0.1357 0.4923 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(iso_joined_hs_mv)
##
## Multivariate Meta-Analysis Model (k = 16; method: REML)
##
## logLik Deviance AIC BIC AICc
## 13.6682 -27.3365 -17.3365 -14.1412 -9.8365
##
## Variance Components:
##
## outer factor: cohort (nlvls = 8)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0353 0.1879 8 no casecontrol
## tau^2.2 0.0016 0.0401 8 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 8
## within 0.5220 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 14) = 170.0779, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 39.2397, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.0902 0.0698 -1.2923 0.1962 -0.2270 0.0466
## typewithin -0.0965 0.0164 -5.8691 <.0001 -0.1288 -0.0643 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(iso_joined_hs_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.8063 0.0766 10.5209 <.0001 0.6561 0.9565 ***
## casecontrol 0.1113 0.0839 1.3273 0.1844 -0.0531 0.2757
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
ecc_within <- within_data %>%
filter(!study %in% c("Tengman 2014b", "Hogberg 2023")) %>%
rename(acl_timepoint_mean = timepoint_mean) %>%
mutate(n_acl_2 = as.character(as.integer(acl_n))) %>%
filter(str_detect(measure, "ecc"),
str_detect(measure, "quad|hs")) %>%
mutate(measure_2 = case_when(
str_detect(measure, "150|180|230|240|300") ~ "Fast Isokinetic",
str_detect(measure, "30|60|90") ~ "Slow Isokinetic"
)) %>%
mutate(measure_3 = case_when(
acl_timepoint_mean <= 12 & measure_2 == "Slow Isokinetic" ~ "Slow Isokinetic <= 1 year",
acl_timepoint_mean > 12 & measure_2 == "Slow Isokinetic" ~ "Slow Isokinetic > 1 year",
TRUE ~ measure_2),
measure_3 = factor(measure_3, levels = c("Slow Isokinetic <= 1 year", "Slow Isokinetic > 1 year",
"Fast Isokinetic"))
)
ecc_within$sei[ecc_within$study == "Gauthier 2022"] <- 0.01209894 # missing for variance for one study, taking value frmo other similar study Raoul.
Only 6 studies report eccentric quadriceps outcomes, most for slow eccentric, at variety of timepoints.
library(meta) # use meta package to fit univariate meta-analyses (for nicer plots)
quad_ecc_meta <- ecc_within %>%
filter(str_detect(measure, "quad")) %>%
metagen(TE = yi, seTE = sei, studlab = study, data = ., subgroup = measure_2, sm = "ROM")
summary(quad_ecc_meta)
## ROM 95%-CI %W(common) %W(random)
## Harput 2018 0.8287 [0.7905; 0.8687] 19.9 24.7
## Engelen-VanMelick 2017 0.9844 [0.9521; 1.0179] 39.4 25.9
## Heijne 2015 0.9300 [0.8901; 0.9716] 23.0 25.0
## Heijne 2015 0.9600 [0.9132; 1.0092] 17.7 24.4
## measure_2
## Harput 2018 Slow Isokinetic
## Engelen-VanMelick 2017 Slow Isokinetic
## Heijne 2015 Slow Isokinetic
## Heijne 2015 Fast Isokinetic
##
## Number of studies combined: k = 4
##
## ROM 95%-CI z p-value
## Common effect model 0.9348 [0.9154; 0.9546] -6.29 < 0.0001
## Random effects model 0.9245 [0.8582; 0.9959] -2.07 0.0386
##
## Quantifying heterogeneity:
## tau^2 = 0.0053 [0.0014; 0.0800]; tau = 0.0726 [0.0369; 0.2829]
## I^2 = 91.5% [81.5%; 96.1%]; H = 3.44 [2.32; 5.08]
##
## Test of heterogeneity:
## Q d.f. p-value
## 35.44 3 < 0.0001
##
## Results for subgroups (common effect model):
## k ROM 95%-CI Q I^2
## measure_2 = Slow Isokinetic 3 0.9295 [0.9082; 0.9512] 34.12 94.1%
## measure_2 = Fast Isokinetic 1 0.9600 [0.9132; 1.0092] 0.00 --
##
## Test for subgroup differences (common effect model):
## Q d.f. p-value
## Between groups 1.33 1 0.2496
## Within groups 34.12 2 < 0.0001
##
## Results for subgroups (random effects model):
## k ROM 95%-CI tau^2 tau
## measure_2 = Slow Isokinetic 3 0.9130 [0.8268; 1.0082] 0.0072 0.0850
## measure_2 = Fast Isokinetic 1 0.9600 [0.9132; 1.0092] -- --
##
## Test for subgroup differences (random effects model):
## Q d.f. p-value
## Between groups 0.79 1 0.3754
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-Profile method for confidence interval of tau^2 and tau
forest_cc_function(quad_ecc_meta, overall = FALSE, prediction.subgroup = TRUE, weight.study = "same", hetstat = FALSE)
11 studies report eccentric quadriceps outcomes, most for slow eccentric, at variety of timepoints.
hs_ecc_meta <- ecc_within %>%
filter(str_detect(measure, "hs")) %>%
metagen(TE = yi, seTE = sei, studlab = study, data = ., subgroup = measure_3, sm = "ROM")
summary(hs_ecc_meta)
## ROM 95%-CI %W(common) %W(random)
## Joseph 2020 0.8848 [0.8539; 0.9168] 5.9 9.4
## Raoul 2019 0.8390 [0.8198; 0.8587] 13.8 9.6
## Harput 2018 0.8735 [0.8537; 0.8937] 14.2 9.6
## Engelen-VanMelick 2017 1.0333 [1.0011; 1.0666] 7.4 9.5
## Alt 2022 0.9013 [0.8823; 0.9208] 16.3 9.6
## Alt 2022 0.9008 [0.8820; 0.9200] 16.8 9.6
## Gauthier 2022 0.8700 [0.8496; 0.8909] 13.2 9.6
## Mesnard 2022 0.7636 [0.7326; 0.7958] 4.3 9.3
## Raoul 2018 0.6000 [0.4991; 0.7212] 0.2 5.3
## Heijne 2015 0.9150 [0.8753; 0.9565] 3.8 9.3
## Heijne 2015 0.9550 [0.9157; 0.9960] 4.2 9.3
## measure_3
## Joseph 2020 Slow Isokinetic <= 1 year
## Raoul 2019 Slow Isokinetic <= 1 year
## Harput 2018 Slow Isokinetic <= 1 year
## Engelen-VanMelick 2017 Slow Isokinetic > 1 year
## Alt 2022 Slow Isokinetic > 1 year
## Alt 2022 Fast Isokinetic
## Gauthier 2022 Slow Isokinetic <= 1 year
## Mesnard 2022 Slow Isokinetic <= 1 year
## Raoul 2018 Slow Isokinetic <= 1 year
## Heijne 2015 Slow Isokinetic > 1 year
## Heijne 2015 Fast Isokinetic
##
## Number of studies combined: k = 11
##
## ROM 95%-CI z p-value
## Common effect model 0.8878 [0.8802; 0.8955] -27.08 < 0.0001
## Random effects model 0.8727 [0.8197; 0.9291] -4.26 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.0105 [0.0054; 0.0555]; tau = 0.1026 [0.0732; 0.2355]
## I^2 = 95.0% [92.8%; 96.6%]; H = 4.49 [3.72; 5.41]
##
## Test of heterogeneity:
## Q d.f. p-value
## 201.44 10 < 0.0001
##
## Results for subgroups (common effect model):
## k ROM 95%-CI Q I^2
## measure_3 = Slow Isokinetic <= 1 year 6 0.8534 [0.8433; 0.8637] 54.35 90.8%
## measure_3 = Slow Isokinetic > 1 year 3 0.9371 [0.9218; 0.9527] 50.50 96.0%
## measure_3 = Fast Isokinetic 2 0.9114 [0.8944; 0.9287] 5.94 83.2%
##
## Test for subgroup differences (common effect model):
## Q d.f. p-value
## Between groups 90.66 2 < 0.0001
## Within groups 110.78 8 < 0.0001
##
## Results for subgroups (random effects model):
## k ROM 95%-CI tau^2 tau
## measure_3 = Slow Isokinetic <= 1 year 6 0.8168 [0.7493; 0.8904] 0.0105 0.1026
## measure_3 = Slow Isokinetic > 1 year 3 0.9481 [0.8705; 1.0327] 0.0054 0.0735
## measure_3 = Fast Isokinetic 2 0.9248 [0.8736; 0.9790] 0.0014 0.0377
##
## Test for subgroup differences (random effects model):
## Q d.f. p-value
## Between groups 7.12 2 0.0284
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-Profile method for confidence interval of tau^2 and tau
forest_cc_function(hs_ecc_meta, overall = FALSE, prediction.subgroup = TRUE, weight.study = "same", hetstat = FALSE)
Only 3 studies report between-person comparisons for eccentric, across quite varied timepoints. No meta-analysis run. Plot here to show effects.
# Split based on contraction types
joined <- quad_joined %>%
filter(!(cohort == "Li 2022" & measure == "quad isk con 30"),
!(cohort == "Siney 2010" & measure == "quad isk con 90")) %>%
filter(!(cohort == "Bailey 2019" & measure == "quad isk con 300"),
!(cohort == "Drocco 2017" & measure == "quad isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "quad isk con 300"),
!(cohort == "Laudner 2015" & measure == "quad isk con 300"),
!(cohort == "Pamukoff 2018" & measure == "quad isk con 120"),
!(cohort == "Tourville 2014" & measure == "quad isk con 300"),
!(cohort == "Welling 2020" & measure == "quad isk con 300"),
!(cohort == "Welling 2018b" & measure == "quad isk con 300"),
!(cohort == "Zult 2017" & measure == "quad isk con 120")) %>%
filter(!(cohort == "Karanikas 2005" & measure == "quad mvic 0"),
!(cohort == "Labanca 2018" & measure == "quad mvic 30"),
!(cohort == "Labanca 2016" & measure == "quad mvic 30")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>% # make sure to only include studies who measure both effects
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>% # take one effect per study, preferably closest to 12 months if multiple
ungroup() %>%
mutate(measure_2 = factor(measure_2, levels = c("Slow Isokinetic", "Isometric", "Fast Isokinetic"))) %>%
group_by(study, type) %>%
arrange(study, measure) %>%
mutate(row = row_number()) %>%
filter(row == 1) %>%
select(-row) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
joinedV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = joined,
checkpd = TRUE)
joined_mv <- rma.mv(yi,
joinedV,
mods = ~ type - 1, # remove intercept to generate estimate for each.
random = list(~ type | study),
struct = c("UN"),
data = joined,
cvvc = "varcov") # need this to use later with matreg
# Split based on contraction types
hs_joined <- hs_joined %>%
filter(!(cohort == "Lee 2019" & measure == "hs isk con 60 - deep"),
!(cohort == "Kim 2011" & measure == "hs isk con 90 hyperflex"),
!(cohort == "Siney 2010" & measure == "hs isk con 90"),
!(cohort == "McRae 2013" & measure == "hs isk con 60 supine"),
!(cohort == "Li 2022" & measure == "hs isk con 30")) %>%
filter(!(cohort == "Drocco 2017" & measure == "hs isk con 240"),
!(cohort == "Kyritsis 2016" & measure == "hs isk con 300"),
!(cohort == "Laudner 2015" & measure == "hs isk con 300"),
!(cohort == "Tourville 2014" & measure == "hs isk con 300"),
!(cohort == "Welling 2020" & measure == "hs isk con 300"),
!(cohort == "Welling 2018b" & measure == "hs isk con 300")) %>%
filter(!(cohort == "Hu 2020" & measure == "hs mvic 30 hhd"),
!(cohort == "Hu 2020" & measure == "hs mvic 60 hhd"),
!(cohort == "Ardern 2010" & measure == "hs mvic 30"),
!(cohort == "Ardern 2010" & measure == "hs mvic 105")) %>%
group_by(study, measure, timepoint) %>%
filter(length(type) == 2) %>% # make sure to only include studies who measure both effects
ungroup() %>%
group_by(study, measure, type) %>%
slice(which.min(abs(timepoint_mean - as.numeric(12)))) %>% # take one effect per study, preferably closest to 12 months if multiple
ungroup() %>%
mutate(measure_2 = factor(measure_2, levels = c("Slow Isokinetic", "Isometric", "Fast Isokinetic"))) %>%
group_by(study, type) %>%
arrange(study, measure) %>%
mutate(row = row_number()) %>%
filter(row == 1) %>%
select(-row) %>%
ungroup() %>%
filter(!is.na(vi)) %>% # remove any missing data
group_by(study) %>%
filter(n() > 1) %>%
ungroup()
hs_joinedV <- vcalc(vi,
cluster = study,
type = type,
rho = 0.7,
data = hs_joined,
checkpd = TRUE)
hs_joined_mv <- rma.mv(yi,
hs_joinedV,
mods = ~ type - 1, # remove intercept to generate estimate for each.
random = list(~ type | study),
struct = c("UN"),
data = hs_joined,
cvvc = "varcov") # need this to use later with matreg
summary(joined_mv)
##
## Multivariate Meta-Analysis Model (k = 54; method: REML)
##
## logLik Deviance AIC BIC AICc
## 34.2578 -68.5157 -58.5157 -48.7594 -57.2113
##
## Variance Components:
##
## outer factor: study (nlvls = 27)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0276 0.1662 27 no casecontrol
## tau^2.2 0.0188 0.1372 27 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 27
## within 0.7920 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 52) = 1452.7544, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 48.1098, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.2131 0.0335 -6.3614 <.0001 -0.2787 -0.1474 ***
## typewithin -0.1790 0.0268 -6.6891 <.0001 -0.2315 -0.1266 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(joined_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.3077 0.0898 3.4260 0.0006 0.1317 0.4837 ***
## casecontrol 0.6538 0.1111 5.8827 <.0001 0.4360 0.8717 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]
summary(hs_joined_mv)
##
## Multivariate Meta-Analysis Model (k = 38; method: REML)
##
## logLik Deviance AIC BIC AICc
## 35.8751 -71.7502 -61.7502 -53.8326 -59.7502
##
## Variance Components:
##
## outer factor: study (nlvls = 19)
## inner factor: type (nlvls = 2)
##
## estim sqrt k.lvl fixed level
## tau^2.1 0.0279 0.1671 19 no casecontrol
## tau^2.2 0.0030 0.0552 19 no within
##
## rho.cscn rho.wthn cscn wthn
## casecontrol 1 - 19
## within 0.6631 1 no -
##
## Test for Residual Heterogeneity:
## QE(df = 36) = 449.4270, p-val < .0001
##
## Test of Moderators (coefficients 1:2):
## QM(df = 2) = 28.0679, p-val < .0001
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## typecasecontrol -0.0643 0.0400 -1.6060 0.1083 -0.1427 0.0142
## typewithin -0.0652 0.0135 -4.8395 <.0001 -0.0915 -0.0388 ***
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
corplot_function(hs_joined_mv)
## [[1]]
##
## estimate se zval pval ci.lb ci.ub
## intrcpt 0.7316 0.0649 11.2662 <.0001 0.6043 0.8588 ***
## casecontrol 0.2190 0.0692 3.1625 0.0016 0.0833 0.3547 **
##
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
##
## [[2]]